In this notebook, a template is provided for you to implement your functionality in stages which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission, if necessary. Sections that begin with 'Implementation' in the header indicate where you should begin your implementation for your project. Note that some sections of implementation are optional, and will be marked with 'Optional' in the header.
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
### Basic notebook setup and utilities.
# Whether to run functions that demonstrate parts of the project.
__demos__ = True
class Attributes(object):
r'''Dynamic collection of named attributes.
'''
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
Visualize the German Traffic Signs Dataset. This is open ended, some suggestions include: plotting traffic signs images, plotting the count of each sign, etc. Be creative!
The pickled data is a dictionary with 4 key/value pairs:
### Basic dataset manipulation classes.
import pickle
from collections import defaultdict
from glob import iglob
from os.path import isdir
from os.path import join as join_path
from re import search
import numpy as np
from scipy.misc import imread
class Data(object):
r'''Base class for single-type data collection classes.
'''
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def extend(self, data):
self.data = np.concatenate([self.data, data])
@property
def shape(self):
return self.data.shape
class Images(Data):
r'''Collection of RGB images, represented as 3D arrays of unsigned 8-bit integers.
'''
def __init__(self, data):
Data.__init__(self, data)
def image(self, index):
return self[index]
class Labels(Data):
r'''Collection of class labels, represented as integer values.
'''
def __init__(self, data, breadth):
Data.__init__(self, data)
self.breadth = breadth
if breadth == None:
n = range(len(self))
classes = set(self.classof(i) for i in n)
self.breadth = len(classes)
def classof(self, index):
return self[index]
@property
def classes(self):
classes = dict((c, []) for c in range(self.breadth))
for i in range(len(self)):
k = self.classof(i)
classes[k].append(i)
return classes
class Dataset(object):
r'''A collection of data cases and associated class identifiers.
'''
def __init__(self, *args, **kwargs):
r'''Create a new dataset instance.
Datasets can be loaded from files, for example:
dataset = Dataset('title', 'path/to/pickled_file.p')
They can also be created from other datasets:
dataset = Dataset(other_dataset)
or
dataset = Dataset(title, X, y)
'''
if len(args) == 1:
self.__assign(args[0])
return
self.title = args[0]
if len(args) == 2:
(X, y) = self.__load(args[1])
self.X = Images(X)
self.y = Labels(y, kwargs.get('breadth'))
elif len(args) == 3:
self.X = args[1]
self.y = args[2]
else:
raise Exception('Invalid argument list: %s' % str(args))
def __assign(self, dataset):
self.title = dataset.title
self.X = dataset.X
self.y = dataset.y
def __load(self, path):
if isdir(path):
X = []
y = []
for filename in iglob(join_path(path, '*')):
match = search(r'(\d+)_\d+\.', filename)
if match != None:
image = imread(filename)
label = int(match.group(1))
X.append(image)
y.append(label)
return (np.array(X), np.array(y))
with open(path, mode='rb') as data:
dataset = pickle.load(data)
return (dataset['features'], dataset['labels'])
def __len__(self):
return len(self.X)
def __str__(self):
template = (
'%s dataset\n'
'Number of entries: %d\n'
'Input shape: %s\n'
'Output shape: %s\n'
'Number of classes: %d\n'
)
return template % (
self.title,
len(self),
str(self.X.shape[1:]),
str(self.y.shape[1:]),
self.y.breadth
)
def Parameters():
r'''Create a new set of parameters for a neural network training problem.
'''
parameters = Attributes()
parameters.inputs = Attributes()
parameters.output = Attributes()
parameters.patch = Attributes()
parameters.batch = Attributes()
return parameters
def load_datasets():
data = Attributes()
data.train = Dataset('Train', 'datasets/pickled/train.p')
data.test = Dataset('Test', 'datasets/pickled/test.p', breadth=data.train.y.breadth)
return data
def print_datasets(data):
for dataset in vars(data).values():
print(dataset)
### To start off let's do a basic data summary.
if __demos__ == True:
print_datasets(load_datasets())
### Data visualization facilities.
import csv
from random import sample
from matplotlib import pyplot as plt
from matplotlib import cm
# Widen the page region used for plots
plt.rcParams['figure.figsize'] = (9.0, 4.0) # Otiginal: (6.0, 4.0)
class Displayer(object):
def __init__(self, path):
with open(path) as csv_file:
csv_data = csv.reader(csv_file)
next(csv_data) # Discard column title row
self.labels = dict((int(row[0]), str(row[1])) for row in csv_data)
def __call__(self, dataset, width=5):
r'''Display `k` samples of each class from the given dataset.
'''
n = len(dataset)
print('\n\n %s' % ('-' * 80))
print(' %s dataset (total %d entries)' % (dataset.title, n))
print(' %s\n\n' % ('-' * 80))
labels = self.labels
classes = dataset.y.classes
for c in range(len(classes)):
indexes = classes[c]
n = len(indexes)
if n == 0:
continue
print(' Class %d ("%s", total %d entries) samples:' % (c, labels[c], len(indexes)))
k = min(width, n)
s = sample(indexes, k)
self.display_signs(dataset, s, width)
def display_signs(self, dataset, indexes, width):
r'''Display the indexed sign images and corresponding labels side by side.
'''
n = len(indexes)
for i in range(n):
plotter = plt.subplot2grid((1, width), (0, i))
self.display_sign(plotter, dataset, indexes[i])
plt.tight_layout()
plt.show()
def display_sign(self, plotter, dataset, i):
r'''Display a sign image and corresponding numeric label.
'''
plotter.imshow(dataset.X.image(i))
plotter.xaxis.set_visible(False)
plotter.yaxis.set_visible(False)
plotter.title.set_text(str(i))
display = Displayer('datasets/signnames.csv')
def display_datasets(data):
display(data.train)
display(data.test)
### Display summary statistics and samples for the training and test datasets.
if __demos__ == True:
display_datasets(load_datasets())
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the German Traffic Sign Dataset.
There are various aspects to consider when thinking about this problem:
Here is an example of a published baseline model on this problem. It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow.
### Data preprocessing facilities.
from itertools import product
class Tensors(Images):
r'''Collection of multi-dimensional vectors.
'''
def __init__(self, values):
Images.__init__(self, values.astype(np.float32))
def image(self, index):
x = self[index]
image = np.zeros(x.shape, dtype=np.uint8)
for d in range(3):
channel = np.array(x[:, :, d])
channel -= channel.min()
channel *= (255.0 / channel.max())
image[:, :, d] = channel.astype(np.uint8)
return image
class Likelihoods(Labels):
r'''Collection of vectors indicating the likelihoods an input belongs to each of a set of classes.
'''
def __init__(self, data, breadth):
Labels.__init__(self, data, breadth)
if len(data.shape) == 1:
self.data = (np.arange(self.breadth) == data[:, None]).astype(np.float32)
def classof(self, index):
return np.argmax(self[index])
class Vectorized(Dataset):
r'''A dataset where both inputs and outputs are represented as floating-point arrays.
'''
def __init__(self, *args, **kwargs):
Dataset.__init__(self, *args, **kwargs)
self.X = Tensors(self.X.data)
self.y = Likelihoods(self.y.data, self.y.breadth)
class Normalized(Dataset):
def __init__(self, *args, **kwargs):
Dataset.__init__(self, *args, **kwargs)
X = self.X
n = X.shape[0]
d = X.shape[-1]
for (i, j) in product(range(n), range(d)):
channel = X[i, :, :, j]
channel -= channel.mean()
channel /= channel.std()
def load_vectorized():
data = load_datasets()
data.train = Vectorized(data.train)
data.test = Vectorized(data.test)
return data
def load_normalized():
data = load_vectorized()
data.train = Normalized(data.train)
data.test = Normalized(data.test)
return data
# Preprocess training and test datasets.
if __demos__ == True:
print_datasets(load_normalized())
Describe the techniques used to preprocess the data.
Answer:
In order to ensure numeric stability and make the associated optimizaton problem well-conditioned [^], inputs are normalized to zero mean and unit standard deviation. This is done simply by converting images to floating-point multidimensional arrays and then normalizing each channel separately. Outputs are also preprocessed here, converted from integer label indicators to one hot-encoded vectors, for use later on training.
### Separate a validation subset from the training dataset.
from random import choice, triangular, shuffle
from skimage.transform import warp, AffineTransform
from sklearn.cross_validation import train_test_split
def jiggle(image, **kwargs):
r'''Perform slight changes to the input array.
Input images are randomly multiplied by a brightness mask,
rolled around the horizontal and vertical axes, and rotated.
'''
r_dist = kwargs.get('r_dist', 5)
r_degs = kwargs.get('r_degs', 5)
r_scale = kwargs.get('r_scale', 0.1)
tx = int(triangular(-r_dist, r_dist))
ty = int(triangular(-r_dist, r_dist))
sl = triangular(1.0 - r_scale, 1.0 + r_scale)
r_rads = np.pi * r_degs / 180.0
t_rotate = triangular(-r_rads, r_rads)
t_shear = triangular(-r_rads, r_rads)
shear = AffineTransform(
rotation=t_rotate,
shear=t_shear,
translation=(tx, ty),
scale=(sl, sl)
)
return warp(image, shear, mode='wrap')
def padding(dataset, **kwargs):
r'''Generate a dataset with randomly modified data items from the source dataset.
Items are generated in inverse class distribution relative to the source dataset,
so that no items are generated for the most numerous class, and the least numerous
is given the most items.
'''
classes = dataset.y.classes.values()
X = dataset.X
y = dataset.y
n_class = kwargs.get('n_class')
if n_class == None:
n_class = np.max([len(cases) for cases in classes])
Xp = []
yp = []
for cases in classes:
n = len(cases)
if kwargs.get('all_jiggled') == True:
n = 0
else:
k = (sample(cases, n_class) if n_class <= n else cases)
Xp.extend(X[k])
yp.extend(y[k])
for i in range(n_class - n):
k = choice(cases)
Xp.append(jiggle(X[k], **kwargs))
yp.append(y[k])
DataX = X.__class__
DataY = y.__class__
breadth = dataset.y.breadth
return Dataset('Padding', DataX(np.array(Xp)), DataY(np.array(yp), breadth))
class Padded(Dataset):
r'''A dataset where all classes are guaranteed to have the same case count.
If some classes in the original dataset contain more cases than others,
extra cases are generated through random transformations, until all
classes have the same number of cases.
'''
def __init__(self, *args, **kwargs):
Dataset.__init__(self, *args, **kwargs)
dataset = padding(self, **kwargs)
self.X = dataset.X
self.y = dataset.y
def split(dataset, rate=0.25):
X = dataset.X
y = dataset.y
DataX = X.__class__
DataY = y.__class__
(X_train, X_valid, y_train, y_valid) = train_test_split(
X.data,
y.data,
test_size=rate,
random_state=832289
)
breadth = dataset.y.breadth
return (
Dataset('Train', DataX(X_train), DataY(y_train, breadth)),
Dataset('Validate', DataX(X_valid), DataY(y_valid, breadth))
)
def split_train(data):
(data.train, data.valid) = split(data.train)
return data
if __demos__ == True:
print_datasets(split_train(load_datasets()))
display(padding(load_datasets().train, all_jiggled=True, n_class=5))
Answer:
Traffic signs are not equally represented in the training data set, with some appearing much more often than others. Such imbalance would make training statistics less reliable; this issue is addressed here through the generation of jiggled duplicates of sign images from the less frequent classes. Jiggling involves rotating, translating and changing the brightness of images by random amounts within a restricted range. Once the classes are padded to the same size by the addition of jiggled images, a validation set is separated from the training set by randomly setting aside 30% of training cases. Both training and validation datasets are then shuffled.
### Define your architecture here.
### Feel free to use as many code cells as needed.
import tensorflow as tf
def weights(*shape):
return tf.Variable(tf.truncated_normal(list(shape), stddev=0.1))
def bias(b, n):
return tf.Variable(tf.constant(b, shape=[n]))
def layer_depth(layer):
return layer.get_shape().as_list()[-1]
def layer_2d(layer):
shape = layer.get_shape()
shape = [-1, shape[1:].num_elements()]
return tf.reshape(layer, shape)
def layer_nl(layer):
return tf.nn.relu(layer)
def layer_connected(layer, b, n):
m = layer_depth(layer)
return tf.matmul(layer, weights(m, n)) + bias(b, n)
def layer_convolved(layer, side, depth, stride=1, padding='SAME', b=None):
strides = [1, stride, stride, 1]
W = weights(side, side, layer_depth(layer), depth)
C = tf.nn.conv2d(layer, W, strides, padding)
B = weights(depth) if b == None else bias(b, depth)
return C + B
def layer_max_pool(layer, ksize, stride=None, padding='SAME'):
if stride == None:
stride = ksize
return tf.nn.max_pool(layer, [1, ksize, ksize, 1], [1, stride, stride, 1], padding)
# Architecture and parameters taken from:
# https://hackathonprojects.wordpress.com/2016/09/25/inception-modules-explained-and-implemented/
def inception_module(inputs, depth_1x1, depth_out, stride=1):
def conv_1x1(layer=inputs, depth=depth_out, stride=stride):
return layer_convolved(layer, 1, depth, stride)
def conv_3x3():
reduced = layer_nl(conv_1x1(inputs, depth_1x1, 1))
return layer_convolved(reduced, 3, depth_out, stride)
def conv_5x5():
reduced = layer_nl(conv_1x1(inputs, depth_1x1, 1))
return layer_convolved(reduced, 5, depth_out, stride)
def max_pool():
pooled = tf.nn.max_pool(inputs, [1, 3, 3, 1], [1, 1, 1, 1], 'SAME')
return conv_1x1(pooled, depth_out)
return layer_nl(tf.concat(3, [conv_1x1(), conv_3x3(), conv_5x5(), max_pool()]))
def mnist_architecture(inputs, parameters):
side = parameters.patch.side
depth = parameters.patch.depth
stride = parameters.stride
n_hidden = parameters.hidden_nodes
n_classes = parameters.output.classes
layer = layer_convolved(inputs, side, depth, stride, padding='VALID')
layer = layer_convolved(layer_nl(layer), side, depth, stride, padding='VALID')
layer = layer_nl(layer)
layer = layer_connected(layer_2d(layer), 1.0, n_hidden)
layer = layer_connected(layer_nl(layer), 1.0, n_classes)
return layer
def mnist_architecture_2(inputs, parameters):
side = parameters.patch.side
depth = parameters.patch.depth
stride = parameters.stride
n_hidden = parameters.hidden_nodes
n_classes = parameters.output.classes
layer = layer_max_pool(inputs, stride)
layer = layer_convolved(layer, side, depth)
layer = layer_nl(layer)
layer = layer_max_pool(layer, stride)
layer = layer_convolved(layer, side, depth)
layer = layer_nl(layer)
layer = layer_connected(layer_2d(layer), 1.0, n_hidden)
layer = layer_connected(layer_nl(layer), 1.0, n_classes)
return layer
def mini_inception_architecture(inputs, parameters):
depth = parameters.patch.depth
stride = parameters.stride
n_hidden = parameters.hidden_nodes
n_classes = parameters.output.classes
def mini_inception_module(layer):
conv_1x1 = layer_convolved(layer, 1, depth, stride)
conv_3x3 = layer_convolved(layer, 3, depth, stride)
conv_5x5 = layer_convolved(layer, 5, depth, stride)
max_pool = layer_max_pool(layer, 3, stride)
inception = layer_nl(tf.concat(3, [conv_1x1, conv_3x3, conv_5x5, max_pool]))
return layer_nl(layer_convolved(inception, 1, depth))
layer = mini_inception_module(inputs)
layer = mini_inception_module(layer)
layer = layer_connected(layer_2d(layer), 1.0, n_hidden)
layer = layer_connected(layer_nl(layer), 1.0, n_classes)
return layer
def inception_architecture(inputs, parameters):
depth = parameters.patch.depth
stride = parameters.stride
n_hidden = parameters.hidden_nodes
n_classes = parameters.output.classes
layer = inception_module(inputs, depth // 2, depth, stride)
layer = inception_module(layer, depth // 2, depth, stride)
layer = layer_connected(layer_2d(layer), 1.0, n_hidden)
layer = layer_connected(layer_nl(layer), 1.0, n_classes)
return layer
class Network(object):
def __init__(self, architecture, parameters):
l_input = parameters.inputs.side
d_input = parameters.inputs.depth
with tf.Graph().as_default(): # ensures variable names are consistent across network instances
self.inputs = tf.placeholder(tf.float32, shape=(None, l_input, l_input, d_input))
self.outputs = architecture(self.inputs, parameters)
self.session = tf.Session()
def __call__(self, X):
return self.session.run(self.outputs, feed_dict={self.inputs: X})
def __repr__(self):
def repr_layers(layer):
return [repr(layer)] + sum((repr_layers(child) for child in layer.op.inputs), [])
return '\n'.join(repr_layers(self.outputs))
def init_variables(self):
with self.session.graph.as_default():
init = tf.initialize_all_variables()
self.session.run(init)
def default_parameters():
parameters = Parameters()
parameters.stride = 1
parameters.inputs.side = 32
parameters.inputs.depth = 3
parameters.output.classes = 43
parameters.patch.side = 5
parameters.patch.depth = 32
parameters.hidden_nodes = 64
parameters.batch.size = 50
parameters.batch.step = 10
parameters.learning_rate = 0.1
parameters.epochs = 5
return parameters
def default_network():
return Network(mini_inception_architecture, default_parameters())
if __demos__ == True:
network = default_network()
print(repr(network))
What does your final architecture look like? (Type of model, layers, sizes, connectivity, etc.) For reference on how to build a deep neural network using TensorFlow, see Deep Neural Network in TensorFlow from the classroom.
Answer:
The basic architecture is composed of a stacked of pair feature extractor modules, followed by a two-layer fully-connected network, with Rectified Linear Units (ReLU's) placed between each level of the network:
Network inputs are composed of K images of width W, height H and D channels. Feature extractors produce feature maps of F channels from each image (or alternatively, lower-level feature maps); those are then fed to a "hidden" fully-connected layer, which outputs one-dimensional vectors of dimension E. A second fully-connected layer converts those vectors to dimension C, the number of classes in the classification problem. Network outputs are K vectors of dimension C; for a list O of output vectors, O[k][c] represents how strongly input image k is predicted to relate to problem class c. Fo the traffic sign classification problem, we have W = H = 32, D = 3 and C = 43.
The exact nature of the feature extractors went through a few revisions over the course of my work. The first version was just a 5x5 convolutional layer of usual composition, that is, a stack of convolutions added to a bias:
Furthermore the stride of the convolutional layers was set to 2, effectively halving W and H at the output of each module. Initially F = 16 and E = 32, but soon I found that by rising F to 32 and E to 64, training results were significantly improved. I also found that changing convolution padding from SAME to VALID (which further reduced the width and height of feature maps) improved training times without significantly impacting recognition results. Removing the stride didn't seem to make much of a difference, however; a variation of this network with stride 1 convolution layers preceded by a 2x2, stride 2 max-pool layer produced slightly worse results.
Having been told about inception modules in the online lessons, and seeing them mentioned again in the course's forums, I decided to give them a try. I implemented the version described on this page, which are of the following form:
Where the 1x1 convolutions before the 3x3 and 5x5 layers are dimensionality reduction filters: for R < F, they produce compacted feature maps that are faster to process. In my experiments I set R = F/2. However I found that even with this trick, training was too slow on my machine for practical experimenation with the full dataset, while results with a restricted dataset didn't seem to improve much on the previous architecture.
Therefore I compromised, implementing a variation of the inception module that would compact results from all module layers on output:
This proved an improvement relative to my initial architecture, however it was still too slow for thorough hyperparameter evaluation in the time I had left; so other than setting stride to 1 and padding to SAME (which seemed to produce better results) I kept them as they were.
### Train your model here.
### Feel free to use as many code cells as needed.
from math import ceil, floor
from sys import maxsize
from tqdm import tqdm
def batches(dataset, l, counter=range):
X = dataset.X
y = dataset.y
n = int(ceil(len(X) / l)) # Ensures a final "rest" batch will be issued as appropriate
for k in counter(n):
a = k * l
b = a + l
yield (k, X[a:b], y[a:b])
class Accuracy(object):
def __init__(self, network, batch_size, *datasets):
self.inputs = network.inputs
self.session = network.session
with network.session.graph.as_default():
self.outputs = tf.placeholder(tf.float32)
is_correct_prediction = tf.equal(tf.argmax(network.outputs, 1), tf.argmax(self.outputs, 1))
self.accuracy = tf.reduce_sum(tf.cast(is_correct_prediction, tf.float32))
self.datasets = datasets
self.batch_size = batch_size
def __call__(self):
def accuracy(dataset):
total = 0.0
count = 0.0
for (k, X_k, y_k) in batches(dataset, self.batch_size):
data = {self.inputs: X_k, self.outputs: y_k}
total += self.session.run(self.accuracy, feed_dict=data)
count += len(X_k)
return total / count
return tuple(accuracy(dataset) for dataset in self.datasets)
class Optimizer(object):
def __init__(self, network, learning_rate):
self.inputs = network.inputs
self.session = network.session
with self.session.graph.as_default():
self.outputs = tf.placeholder(tf.float32)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(network.outputs, self.outputs)
self.loss = tf.reduce_mean(cross_entropy)
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
# TODO: further experiment with AdamOptimizer
# https://www.tensorflow.org/versions/r0.11/api_docs/python/train.html#AdamOptimizer
#self.optimizer = tf.train.AdamOptimizer(1e-2, beta1=0.99, epsilon=0.1).minimize(self.loss)
# TODO: further experiment with TensorBoard
#tf.scalar_summary('loss', self.loss)
#self.summaries = tf.merge_all_summaries()
#self.writer = tf.train.SummaryWriter('logs', self.session.graph)
def __call__(self, X, y):
data = {self.inputs: X, self.outputs: y}
(_, l) = self.session.run([self.optimizer, self.loss], feed_dict=data)
return l
# TODO: further experiment with TensorBoard
#(_, s, l) = self.session.run([self.optimizer, self.summaries, self.loss], feed_dict=data)
#self.writer.add_summary(s)
#return l
def plot_lines(title, x, *ys, **kwargs):
y_min = maxsize
y_max = -maxsize
plotter = plt.subplot(111)
for (y, c, l) in ys:
label = '%s (last: %.3f)' % (l, round(y[-1], 3))
plotter.plot(x, y, c, label=label)
y_min = min(floor(np.min(y)), y_min)
y_max = max(ceil(np.max(y)), y_max)
plotter.set_title(title)
plotter.set_xlim([x[0], x[-1]])
plotter.set_ylim([y_min, y_max])
plotter.legend(loc=kwargs.get('loc', 1))
plt.tight_layout()
plt.show()
def train(network, data, parameters):
batch_size = parameters.batch.size
batch_step = parameters.batch.step
learning_rate = parameters.learning_rate
epochs = parameters.epochs
accuracy = Accuracy(network, parameters.batch.size, data.train, data.valid)
optimizer = Optimizer(network, learning_rate)
session = network.session
x_batch = []
y_train = []
y_valid = []
y_loss = []
network.init_variables()
for i in range(epochs):
counter = lambda n: tqdm(range(n), desc='Epoch {:>2}/{}'.format(i + 1, epochs), unit='batches')
for (k, X_k, y_k) in batches(data.train, batch_size, counter):
loss = optimizer(X_k, y_k)
if k % batch_step == 0:
(a_train, a_valid) = accuracy()
x_batch.append(len(x_batch) * batch_step)
y_train.append(a_train)
y_valid.append(a_valid)
y_loss.append(loss)
plot_lines('Loss', x_batch,
(y_loss, 'g', 'Loss')
)
plot_lines('Accuracy', x_batch,
(y_train, 'r', 'Training Accuracy'),
(y_valid, 'b', 'Validation Accuracy'),
loc=4
)
def load(network, path):
session = network.session
with session.graph.as_default():
saver = tf.train.Saver()
saver.restore(session, path)
def save(network, path):
session = network.session
with session.graph.as_default():
saver = tf.train.Saver()
saver.save(session, path)
def trained_network():
data = Attributes()
data.train = Normalized(Vectorized(Padded('Train', 'datasets/pickled/train.p', n_class=2400)))
#data.train = Normalized(Vectorized('Train', 'datasets/pickled/train.p'))
(data.train, data.valid) = split(data.train)
parameters = default_parameters()
parameters.inputs.side = data.train.X.shape[1]
parameters.output.classes = data.train.y.breadth
network = Network(mini_inception_architecture, parameters)
train(network, data, parameters)
save(network, 'network.chk')
return network
network = trained_network()
def run_test():
data_test = Normalized(Vectorized('Test', 'datasets/pickled/test.p'))
network = Network(mini_inception_architecture, default_parameters())
load(network, 'network.chk')
accuracy = Accuracy(network, 50, data_test)
print('Test performance: %.3f' % round(accuracy()[0], 3))
run_test()
How did you train your model? (Type of optimizer, batch size, epochs, hyperparameters, etc.)
Answer:
I experimented with the Adam Optimizer, but after much fiddling with parameters, I coudln't get any especially good results out of it; I also had several instances of optimization getting stuck at local minima, and even some NaN occurrences. So pending a more thorough evaluation of the method, I reverted to gradient descent, which proved reliable and effective enough for my purposes.
After much experimentation (see discussion on the next question), hyperparameters were set to the following values:
Hyperparameters, including batch size and number of epochs, were most taken from other Udacity notebooks, notably the Tensor Flow Lab and the Convolutional Networks notebook from the Deep Learning course. With few exceptions (e.g. the number of hidden layers was chosen as 64 rather than the more common 32) I found the parameters specified in those notebooks to be very close to optimal.
What approach did you take in coming up with a solution to this problem?
Answer:
First I implemented a basic system that would go through all the steps of loading and preparing the data, instantitating the network, training it and presenting results. Once this initial version was in place, I went about improving it piece by piece, refining data management, experimenting with different network architectures, and varying parameters. Because of the importance of data for deep learning, I gave special attention to developing solutions for data preprocessing and selection, including automatic generation of new samples from preexisting data. I stopped working when it became clear that any further improvement would take more time than I had available.
Hyperparameters (see list above) were determined by systematically varying (increasing or decreasing) the value of each parameter while keeping the others fixed, and verifying whether classification improved as a result. Initial values were taken from the previous Tensor Flow lab, where this same procedure was applied to a simpler problem. It was expected that those settings would provide a good starting point for further adjustments. In fact for most parameters, it proved hard to improve on the initial values, so they were left as-is.
Take several pictures of traffic signs that you find on the web or around you (at least five), and run them through your classifier on your computer to produce example results. The classifier might not recognize some local signs but it could prove interesting nonetheless.
You may find signnames.csv useful as it contains mappings from the class id (integer) to the actual sign name.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow.
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
def load_odd_images():
return Dataset('Odd Images', 'datasets/images/', breadth=43)
def display_odd_images():
odd_images = load_odd_images()
display(odd_images)
if __demos__ == True:
display_odd_images()
Choose five candidate images of traffic signs and provide them in the report. Are there any particular qualities of the image(s) that might make classification difficult? It would be helpful to plot the images in the notebook.
Answer:
Some traffic signs in the selected images differ from dataset images in proportions (e.g. border thickness), but otherwise they shouldn't pose a challenge to the trained classifier. For one ligthing is overall much better than in the training set, with sharper constrast.
### Run the predictions here.
### Feel free to use as many code cells as needed.
class Argmax(object):
def __init__(self, network):
self.inputs = network.inputs
self.session = network.session
with self.session.graph.as_default():
self.argmax = tf.argmax(network.outputs, 1)
def __call__(self, X):
return self.session.run(self.argmax, feed_dict={self.inputs: X})
def run_odd_images():
odd_images = Normalized(Vectorized(load_odd_images()))
network = Network(mini_inception_architecture, default_parameters())
load(network, 'network.chk')
predict = Argmax(network)
yp = predict(odd_images.X.data)
for (k, yp_k) in enumerate(yp):
y_k = odd_images.y.classof(k)
print('Class of entry %d is %d, predicted as %d' % (k, y_k, yp_k))
run_odd_images()
Is your model able to perform equally well on captured pictures or a live camera stream when compared to testing on the dataset?
Answer:
The captured image set is very small, therefore it's dangerous to take categorical conclusions from it. That said, classification accuracy on the captured image set was 100%, compared to 91.6% in the test set. Considering how challenging the test set was compared to the captured set (images were often blurred or poorly lighted), this seems to suggest the current architecture could already be sufficient for flawless traffic sign classification, provided input images are clear enough. It does remain to be seen whether the architecture as currently implemented could keep pace with a live image stream, though.
### Visualize the softmax probabilities here.
### Feel free to use as many code cells as needed.
class SoftmaxTopK(object):
def __init__(self, network, k=5):
self.inputs = network.inputs
self.session = network.session
with self.session.graph.as_default():
self.outputs = tf.nn.top_k(tf.nn.softmax(network.outputs), k)
def __call__(self, X):
result = self.session.run(self.outputs, feed_dict={self.inputs: X})
return [(k, v) for (k, v) in zip(result.indices, result.values)]
def run_top_5():
odd_images = Normalized(Vectorized(load_odd_images()))
network = Network(mini_inception_architecture, default_parameters())
load(network, 'network.chk')
predict = SoftmaxTopK(network)
yp = predict(odd_images.X.data)
for (k, (indices, values)) in enumerate(yp):
y_k = odd_images.y.classof(k)
yp_k = ', '.join(('(%d: %.4f)' % (k, v)) for (k, v) in zip(indices, values))
print('Class of entry %d is %d, predictions were: %s' % (k, y_k, yp_k))
run_top_5()
Use the model's softmax probabilities to visualize the certainty of its predictions, tf.nn.top_k could prove helpful here. Which predictions is the model certain of? Uncertain? If the model was incorrect in its initial prediction, does the correct prediction appear in the top k? (k should be 5 at most)
Answer:
Predictions were fairly reliable for the example images used. The least certain result, the stop sign, was likely due to the image not being perfectly centered, however it was still able to predict the correct label with sufficient certainty.
If necessary, provide documentation for how an interface was built for your model to load and classify newly-acquired images.
Answer:
Because of the way the architecture was implemented, with object-oriented API's that build upon one another, adding new features to the system (in this case, to load and classify images acquired after the network was trained) was fairly straighforward: just a matter of writing a couple more wrappers to compute the desired values, and then running the appropriate callables.
Note: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", "File -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission.